In [2]:
# A lot of code was borrowed from https://www.pyimagesearch.com/2016/01/11/opencv-panorama-stitching/

import cv2
import numpy as np
import imutils
from matplotlib import pyplot as plt

IMG_HEIGHT = 3480
IMG_WIDTH = 4640

def extendCorners(p1, p2):
    # extend corners to the edges of the image using existing points p1 and p2
    m = (p2[1]-p1[1])/(p2[0]-p1[0])
    y1 = p1[1]-p1[0]*m
    y2 = p2[1]+(IMG_WIDTH-p2[0])*m
    return (0, int(y1)), (IMG_WIDTH, int(y2))

def warpImage(img, corners, target):
    # warp image within corners to image at target
    M = cv2.getPerspectiveTransform(corners, target)
    out = cv2.warpPerspective(img, M, (IMG_WIDTH, IMG_HEIGHT))
    return out

def detectAndDescribe(image):
    # find keypoints
    descriptor = cv2.xfeatures2d.SIFT_create()
    (kps, features) = descriptor.detectAndCompute(image, None)
 
    # convert the keypoints from KeyPoint objects to NumPy
    # arrays
    kps = np.float32([kp.pt for kp in kps])
 
    # return a tuple of keypoints and features
    return (kps, features)

def sparsifyKeyPoints(kpsA, kpsB, matches, sparsity):
    # limit the number of keypoints per 100 pixels of the image height
    sparsity = sparsity
    count = np.zeros(1000)
    matches_sparse = []
    for (trainIdx, queryIdx) in matches:
        binIndex = int(kpsA[queryIdx][1] / 100)
        if count[binIndex] < sparsity:
            count[binIndex]+=1
            matches_sparse.append((trainIdx, queryIdx))
    return matches_sparse

def matchKeypoints(kpsA, kpsB, featuresA, featuresB,
    ratio, reprojThresh, sparsity):
    # compute the raw matches and initialize the list of actual
    # matches
    matcher = cv2.DescriptorMatcher_create("BruteForce")
    rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
    matches = []

    # loop over the raw matches
    for m in rawMatches:
        # ensure the distance is within a certain ratio of each
        # other (i.e. Lowe's ratio test)
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            matches.append((m[0].trainIdx, m[0].queryIdx))
    
    # sparsify
    matches = sparsifyKeyPoints(kpsA, kpsB, matches, sparsity)
    
    # computing a homography requires at least 4 matches
    if len(matches) > 4:
        # construct the two sets of points
        ptsA = np.float32([kpsA[i] for (_, i) in matches])
        ptsB = np.float32([kpsB[i] for (i, _) in matches])

        # compute the homography between the two sets of points
        (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
            reprojThresh)

        # return the matches along with the homograpy matrix
        # and status of each matched point
        return (matches, H, status)

    # otherwise, no homograpy could be computed
    return None

def drawMatches(imageA, imageB, kpsA, kpsB, matches, status):
    # initialize the output visualization image
    (hA, wA) = imageA.shape[:2]
    (hB, wB) = imageB.shape[:2]
    vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
    vis[0:hA, 0:wA] = imageA
    vis[0:hB, wA:] = imageB

    # loop over the matches
    for ((trainIdx, queryIdx), s) in zip(matches, status):
        # only process the match if the keypoint was successfully
        # matched
        if s == 1:
            # draw the match
            ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
            ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
            cv2.line(vis, ptA, ptB, (0, 255, 0), 10)
    # return the visualization
    return vis

def stitch(images, ratio=0.75, reprojThresh=6.0,
        showMatches=False, sparsity=10):
        # unpack the images, then detect keypoints and extract
        # local invariant descriptors from them
        (imageB, imageA) = images
        (kpsA, featuresA) = detectAndDescribe(imageA)
        (kpsB, featuresB) = detectAndDescribe(imageB)
 
        # match features between the two images
        M = matchKeypoints(kpsA, kpsB,
            featuresA, featuresB, ratio, reprojThresh, sparsity)
 
        # if the match is None, then there aren't enough matched
        # keypoints to create a panorama
        if M is None:
            return None

        # otherwise, apply a perspective warp to stitch the images
        # together
        (matches, H, status) = M
        if showMatches:
            vis = drawMatches(imageA, imageB, kpsA, kpsB, matches,
                status)
            showImage(vis)
        return H
    
def showImage(img):
    # draw image to screen
    figure = plt.figure(figsize = (15,15))
    fig1 = figure.add_subplot(111)
    fig1.imshow(img, interpolation='none')
    plt.show()
In [10]:
imgs = []

for i in range(1,6):
    img = cv2.imread(str(i)+".jpg")
    imgs.append(img)
    
for img in imgs:
    showImage(img)

1) Skew correction

Homography

In [63]:
img = cv2.imread("homographyMatrix.png")
showImage(img)
In [64]:
point_sets = []
point_sets_measured = []

# measurements of any two points along top and bottom line
point_sets_measured.append([[897,340],[4632,302],[901,3187],[4627,3337]])
point_sets_measured.append([[53,506],[4616,512],[69,3398],[4603,3343]])
point_sets_measured.append([[39,170],[4593,121],[36,3250],[4604,3267]])
point_sets_measured.append([[19,161],[4351,83],[21,3281],[4540,3280]])
point_sets_measured.append([[31,249],[3632,142],[32,3196],[3554,3235]])

# extend points to edges of canvas
for points in point_sets_measured:
    pt1, pt2 = extendCorners(points[0], points[1])
    pb1, pb2 = extendCorners(points[2], points[3])
    point_sets.append((pt1, pt2, pb1, pb2))
In [65]:
# show top and btm lines
for img, points in zip(imgs, point_sets):
    img2 = np.array(img)
    cv2.line(img2,points[0],points[1],(0,255,0),10)
    cv2.line(img2,points[2],points[3],(0,255,0),10)
    showImage(img2)
In [66]:
# specify mapped coordinates
corner_coords = np.float32([[0, 0], 
                          [IMG_WIDTH,0],
                          [0,IMG_HEIGHT],
                          [IMG_WIDTH,IMG_HEIGHT]])

# perform correction
imgs_normalized = []
for img,point_set in zip(imgs,point_sets):
    original_coords = np.float32(point_set)
    img = warpImage(img, original_coords, corner_coords)
    imgs_normalized.append(img)
In [67]:
# show corrected images
for img in imgs_normalized:
    showImage(img)

2) Feature detection and matching

a) SIFT

i) Scale space extrema detection

In [4]:
img = cv2.imread("sift_scale.jpg")
showImage(img)

ii) Keypoint localization

Harris corner detector

iii) Orientation assignment

Gradient of local area around keypoint is calculated

In [5]:
img = cv2.imread("sift_orientation.jpg")
showImage(img)

iv) Keypoint descriptor

Orientation is calculated for each 4x4 square in 16x16 area around keypoint, then binned

In [12]:
img = cv2.imread("sift_descriptor.jpg")
showImage(img)
img = cv2.imread("sift_descriptor2.jpg")
showImage(img)

b) Match features

  1. K-nearest neighbour
  2. Perform Lowe's ratio test

c) RANdom SAmpling Consensus (RANSAC)

In [68]:
img = cv2.imread("ransac.jpg")
showImage(img)
In [69]:
# Get transformation matrix
H_array = np.zeros((5,3,3))
for i in [0,2,3]:
    H = stitch((imgs_normalized[i], imgs_normalized[i+1]), showMatches=True, sparsity=10)
    H_array[i] = H
In [70]:
for i in [1]:
    H = stitch((imgs_normalized[i], imgs_normalized[i+1]), showMatches=True, sparsity=14)
    H_array[i] = H

3) Transformation

In [71]:
# shift images down to prevent cutoff
imgs_shifted = []
for i in range(0,len(imgs_normalized)):
    img_shifted = np.zeros((IMG_HEIGHT+1000,IMG_WIDTH,3),dtype=np.uint8)
    img_shifted[500:IMG_HEIGHT+500][0:IMG_WIDTH] = imgs_normalized[i]
    imgs_shifted.append(img_shifted)
    

# stitch images
result = imgs_shifted[4]
for i in range(3, -1, -1):
    result = cv2.warpPerspective(result, H_array[i], (IMG_WIDTH*4,IMG_HEIGHT+2000))
    result[0:imgs_shifted[i].shape[0], 0:imgs_shifted[i].shape[1]] = imgs_shifted[i]
showImage(result)
cv2.imwrite("result.jpg", result)
Out[71]:
True
In [ ]: